In [1]:
from pliers.tests.utils import get_test_data_path
from os.path import join
from pliers.extractors import (ClarifaiAPIImageExtractor, IndicoAPIImageExtractor, GoogleVisionAPILabelExtractor)
from pliers.stimuli.image import ImageStim
from pliers.graph import Graph
In [2]:
# Load the stimulus
stim_path = join(get_test_data_path(), 'image', 'apple.jpg')
stim = ImageStim(stim_path)
In [14]:
# Configure extractions
clarifai_ext = ClarifaiAPIImageExtractor()
indico_ext = IndicoAPIImageExtractor(models=['image_recognition'])
google_ext = GoogleVisionAPILabelExtractor()
In [15]:
# Run extractions
clarifai_res = clarifai_ext.transform(stim)
indico_res = indico_ext.transform(stim)
google_res = google_ext.transform(stim)
In [11]:
clarifai_res.to_df()
Out[11]:
In [32]:
df = indico_res.to_df()
df.loc[:, df.sum() > 0.5]
Out[32]:
In [13]:
google_res.to_df()
Out[13]:
For the apple image, it is clear that the Google and Clarifai APIs perform best, as both have "apple", "food", and "fruit" in the top features. On the other hand, the only Indico API feature with a probability over 0.5 is "pomegranate". Furthermore, the Google API seems to also be less noisy than the Clarifai API, where several object labels have probabilities over 0.9.
In [ ]: